++rpg;
if (i == num_pg) break;
- if (((ulong)&entry[i]) % CACHE_LINE_SIZE == 0) {
+ if ((((ulong)&entry[i]) % cpu_caches.dline_size) == 0) {
last_flush = (ulong)&entry[i - 1];
dcbst(last_flush);
}
++i;
if (i == num_pg) break;
- if (((ulong)&entry[i]) % CACHE_LINE_SIZE == 0) {
+ if ((((ulong)&entry[i]) % cpu_caches.dline_size) == 0) {
last_flush = (ulong)&entry[i - 1];
dcbst(last_flush);
}
/* Linux uses a dummy page, filling "empty" DART entries with a
reference to this page to capture stray DMA's */
dummy_page = (ulong)alloc_xenheap_pages(1);
- memset((void *)dummy_page, 0, PAGE_SIZE);
+ clear_page((void *)dummy_page);
dummy_page >>= PAGE_SHIFT;
printk("Initializing DART 0x%lx: tbl: %p[0x%lx] entries: 0x%lx\n",
#undef DEBUG
#undef SERIALIZE
+struct cpu_caches cpu_caches = {
+ .dline_size = 0x80,
+ .log_dline_size = 7,
+ .dlines_per_page = PAGE_SIZE >> 7,
+ .iline_size = 0x80,
+ .log_iline_size = 7,
+ .ilines_per_page = PAGE_SIZE >> 7,
+};
+
struct rma_settings {
int order;
int rmlr_0;
#define __read_mostly
+struct cpu_caches {
+ u32 dsize; /* L1 d-cache size */
+ u32 dline_size; /* L1 d-cache line size */
+ u32 log_dline_size;
+ u32 dlines_per_page;
+ u32 isize; /* L1 i-cache size */
+ u32 iline_size; /* L1 i-cache line size */
+ u32 log_iline_size;
+ u32 ilines_per_page;
+};
+extern struct cpu_caches cpu_caches;
#endif
#define ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
#define ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
-/* this should be per processor, but for now */
-#define CACHE_LINE_SIZE 128
-
/* 256M - 64M of Xen space seems like a nice number */
#define CONFIG_MIN_DOM0_PAGES (192 << (20 - PAGE_SHIFT))
#define CONFIG_SHADOW 1